In [32]:
import json
from math import sin, cos, tan, pi, radians
import numpy as np
In [69]:
with open('isd.isd', 'r') as f:
isd = json.load(f)
For a framing camera the interior orientation (intrinsic matrix) requires (at a minimum):
The example that we have been working on looks like a pinhole ground to image projection, defined as:
$$\begin{bmatrix} w \cdot u \\ w \cdot v \\ w \end{bmatrix} = \mathbf{K} \begin{bmatrix} \mathbf{Rt} \end{bmatrix} \begin{bmatrix} X\\ Y\\ Z\\ 1 \end{bmatrix} $$or
$$\begin{bmatrix} w \cdot u \\ w \cdot v \\ w \end{bmatrix} = \begin{bmatrix} f & s & u_{0} \\ 0 & \alpha f & v_{0} \\ 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} r_{11} & r_{12} & r_{13} & t_{x} \\ r_{21} & r_{22} & r_{23} & t_{y} \\ r_{31} & r_{32} & r_{33} & t_{z} \\ \end{bmatrix} \begin{bmatrix} X\\ Y\\ Z\\ 1 \end{bmatrix} $$K is the intrinsic matrix (interior orientation), R is the extrinsic matrix (exterior orientation), and t is the translation. In the extrinsic matrix $\alpha$ (pixel aspect ratio) and $s$ (skew) are often assume to be unit and zero, respectively. $f$ is the focal length (in pixels) and ($u_{0}, v_{0}$) are the optical center (principal point).
The second resource below suggests that t can be thought of as the world origin in camera coordinates.
In [162]:
# 512, 512 are the focal width/height in pixels divided by 2
def create_intrinsic_matrix(focal_length, image_width, sensor_width=14.4, skew=0, pixel_aspect=1):
focal_pixels = (focal_length / sensor_width) * image_width # From the IK - how do we get 14.4 automatically
print( 'These should be equal.', focal_pixels * sensor_width / 1024, focal_length)
intrinsic_matrix = np.zeros((3,3))
intrinsic_matrix[0,0] = focal_pixels
intrinsic_matrix[1,1] = focal_pixels
intrinsic_matrix[:,2] = [512.5, 512.5, 1]
return intrinsic_matrix
K = create_intrinsic_matrix(isd['focal_length'], isd['nsamples'])
print(K)
Here we define:
$$L = \begin{bmatrix} X_{L}\\ Y_{L}\\ Z_{L} \end{bmatrix} $$
In [8]:
L = np.array([isd['x_sensor_origin'],
isd['y_sensor_origin'],
isd['z_sensor_origin']])
L
Out[8]:
, where $(x, y, -f)$ are in image coordinates, $k$ is a scale factor, $\mathbf{M}$ is a 3x3 rotation matrix, and $(X,Y,Z)$ represent the object point.
In [22]:
object_point = np.array([1116890, -1604470, 1459570])
# Discard scale momentarily.
k = 1
# Compute M
o = isd['omega']
p = isd['phi']
k = isd['kappa']
M = opk_to_rotation(o, p, k)
xyz = M.dot(L)
# And now reverse because M is orthogonal
L0 = (M.T).dot(xyz)
print(L, L0) # These should be equal.
In [66]:
def opk_to_rotation(o, p, k):
"""
Convert from Omega, Phi, Kappa to a 3x3 rotation matrix
Parameters
----------
o : float
Omega in radians
p : float
Phi in radians
k : float
Kappa in radians
Returns
-------
: ndarray
(3,3) rotation array
"""
om = np.empty((3,3))
om[:,0] = [1,0,0]
om[:,1] = [0, cos(o), -sin(o)]
om[:,2] = [0, sin(o), cos(o)]
pm = np.empty((3,3))
pm[:,0] = [cos(p), 0, sin(p)]
pm[:,1] = [0,1,0]
pm[:,2] = [-sin(p), 0, cos(p)]
km = np.empty((3,3))
km[:,0] = [cos(k), -sin(k), 0]
km[:,1] = [sin(k), cos(k), 0]
km[:,2] = [0,0,1]
return km.dot(pm).dot(om)
def collinearity(f, M, camera_position, ground_position, principal_point=(0,0)):
XL, YL, ZL = camera_position
X, Y, Z = ground_position
x0, y0 = principal_point
x = (-f * ((M[0,0] * (X - XL) + M[0,1] * (Y - YL) + M[0,2] * (Z - ZL))/
(M[2,0] * (X - XL) + M[2,1] * (Y - YL) + M[2,2] * (Z - ZL)))) + x0
y = (-f * ((M[1,0] * (X - XL) + M[1,1] * (Y - YL) + M[1,2] * (Z - ZL))/
(M[2,0] * (X - XL) + M[2,1] * (Y - YL) + M[2,2] * (Z - ZL)))) + y0
return x, y, -f
def collinearity_inv(f, M, camera_position, pixel_position, elevation, principal_point=(0,0)):
XL, YL, ZL = camera_position
x, y = pixel_position
Z = elevation
x0, y0 = principal_point
X = (Z-ZL) * ((M[0,0] * (x - x0) + M[1,0] * (y - y0) + M[2,0] * (-f))/
(M[0,2] * (x - x0) + M[1,2] * (y - y0) + M[2,2] * (-f))) + XL
Y = (Z-ZL) * ((M[0,1] * (x - x0) + M[1,1] * (y - y0) + M[2,1] * (-f))/
(M[0,2] * (x - x0) + M[1,2] * (y - y0) + M[2,2] * (-f))) + YL
return X,Y
o = radians(2)
p = radians(5)
k = radians(15)
XL = 5000
YL = 10000
ZL = 2000
# Interior Orientation
x0 = 0.015 # mm
y0 = -0.02 # mm
f = 152.4 # mm
# Ground Points
X = 5100
Y = 9800
Z = 100
M = opk_to_rotation(o,p,k) # Distortion model here?
# This is correct as per Mikhail
x, y, _ = collinearity(f, M, [XL, YL, ZL], [X, Y, Z], [0,0])
print(x, y)
x, y, _ = collinearity(f, M, [XL, YL, ZL], [X, Y, Z], [x0,y0])
print(x,y)
# And now the inverse, find X, Y
Z = 500 # Provided by Mikhail - his random number
print(collinearity_inv(f, M, [XL, YL, ZL], [x, y], Z, (x0, y0)))
In [70]:
# First from pixel to ground:
f = isd['focal_length']
XL = isd['x_sensor_origin']
YL = isd['y_sensor_origin']
ZL = isd['z_sensor_origin']
# We know that the pixel size is 0.014^2 mm per pixel (14.4mm / 1024 pixels)
pixel_size = 0.014
x0 = 512 * pixel_size # Convert from pixel based principal point to metric principal point
y0 = 512 * pixel_size
f = isd['focal_length']
M = opk_to_rotation(o,p,k)
# This is image to ground
X, Y = collinearity_inv(f, M, [XL, YL, ZL], [10.2,5.1], 1000, (x0, y0))
print('Ground: ', X, Y, 1000) # Arbitrary 1000m elevation - here is where iteration with intersection is needed.
# Now reverse! This is ground to image
# These are in mm and need to convert to pixels
x, y, f = collinearity(f, M, [XL, YL, ZL], [X, Y, 1000], [x0,y0])
print(x,y)
In [1]:
def opk_to_rotation(o, p, k):
"""
Convert from Omega, Phi, Kappa to a 3x3 rotation matrix
"""
om = np.empty((3,3))
om[:,0] = [1,0,0]
om[:,1] = [0, cos(o), -sin(o)]
om[:,2] = [0, sin(o), cos(o)]
pm = np.empty((3,3))
pm[:,0] = [cos(p), 0, sin(p)]
pm[:,1] = [0,1,0]
pm[:,2] = [-sin(p), 0, cos(p)]
km = np.empty((3,3))
km[:,0] = [cos(k), -sin(k), 0]
km[:,1] = [sin(k), cos(k), 0]
km[:,2] = [0,0,1]
return km.dot(pm).dot(om)
# This makes a great test case (Mikhail p.95 has the rotation matrix.)
o = isd['omega']
p = isd['phi']
k = isd['kappa']
# This is R, but we need t to have a proper augmented matrix
R = np.empty((3,4))
R[:,:3] = opk_to_rotation(o, p, k)
RC = np.empty((4,4))
RC[:3,:3] = opk_to_rotation(o, p, k)
RC[:3,-1] = [isd['x_sensor_origin'],
isd['y_sensor_origin'],
isd['z_sensor_origin']]
RC[-1] = [0,0,0,1]
invRC = np.linalg.inv(RC)[:3, :]
print(invRC)
def setfocalrot(x, y, z):
# This is a focal plan rotation matrix that is flipping the camera vertically (I think)
# 0,0,1000 is the z position of the spacecraft
c = np.zeros((3,4))
c[0,0] = 1
c[1,1] = -1
c[2,2] = -1
c[:,3] = [x,y,z]
return c
# Arguments are spacecraft position: x, y, z
c = setfocalrot(isd['x_sensor_origin'],
isd['y_sensor_origin'],
isd['z_sensor_origin'])
def pixelloc(K,R,t, tx, ty):
res = K.dot(R).dot(t)
res[0] /= res[-1]
res[1] /= res[-1]
res[2] /= res[-1]
# Mapping from focal plane to pixel space
res[0]
res[1]
return res[:2]
# pixel position on the surface: x,y,z,1
position = np.array([1116890,
-1604470,
1459570,
1])
# The above should be (ballpark) 90 and 110 I believe
"""position = np.array([1131980,
-1597990,
1455060,
1])"""
ploc = pixelloc(K, invRC, position, isd['transx'][1], isd['transy'][2])
ploc
In [ ]:
In [130]:
def ground_to_image(ground, precision):
i = 0
while current_precision > precision:
current_precision = g2i(ground, precision)
i += 1
if i > 10:
break
def calc_rotation_matrix(o, p, k):
R = np.empty((3,4))
R[:,:3] = rotation_from_opk(o, p, k)
R[:,:-1] = [0,0,1]
return R
def g2i(ground):
gx = ground[0]
gy = ground[1]
gz = ground[2]
r = calc_rotation_matrix(o,p,k)
# This does not account for adjustments - how
lnum =
snum =
denom =
In [ ]: